import ipywidgets as widgets
import itertools as it
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
from ipywidgets import interact
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelBinarizer
from sklearn.ensemble import IsolationForest
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.neighbors import KernelDensity
from tensorflow import keras
from tqdm import tqdm
from tfl_training_anomaly_detection.exercise_tools import (
evaluate,
get_kdd_data,
get_house_prices_data,
create_distributions,
contamination,
perform_rkde_experiment,
get_mnist_data
)
from tfl_training_anomaly_detection.vae import VAE, build_decoder_mnist, build_encoder_minst, build_contaminated_minst
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (5, 5)
MNIST is one of the most iconic data sets in the history of machine learning. It contains 70000 samples of $28\times 28$ grayscale images of handwritten digits. Because of its moderate complexity and good visualizability it is well suited to study the behavior of machine learning algorithms in higher dimensional spaces.
While originally created for classification (optical character recognition), we can build an anomaly detection data set by corrupting some of the images.
We first need to obtain the MNIST data set and prepare an anomaly detection set from it. Note that the data set is n row vector format. Therefore, we work with $28\times 28 = 784$ dimensional data points.
# load MNIST Data Set
mnist = get_mnist_data()
data = mnist['data']
print('data.shape: {}'.format(data.shape))
target = mnist['target'].astype(int)
We prepared a function that does the job for us. It corrupts a prescribed portion of the data by introducing a rotation, noise or a blackout of some part of the image.
First, we need to transform the data into image format.
X = data.reshape(-1, 28, 28, 1)/255
We will only corrupt the test set, hence we will perform the train-test split beforehand. We separate a relatively small test set so that we can use as much as possible from the data to obtain high quality representations.
test_size = .1
X_train, X_test, target_train, target_test = train_test_split(X, target, test_size=test_size)
X_test, y_test = build_contaminated_minst(X_test)
# Visualize contamination
anomalies = X_test[y_test != 0]
selection = np.random.choice(len(anomalies), 25)
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(5, 5))
for img, ax in zip(anomalies[selection], axes.flatten()):
ax.imshow(img, 'gray')
ax.axis('off')
plt.show()
Let us finally train an autoencoder model. We replicate the model given in the Keras documentation and apply it in a synthetic outlier detection scenario based on MNIST.
in the vae package we provide the implementation of the VAE. Please take a look into the source code to see how the minimization of the KL divergence is implemented.
latent_dim = 3
vae = VAE(decoder=build_decoder_mnist(latent_dim=latent_dim), encoder=build_encoder_minst(latent_dim=latent_dim))
## Inspect model architecture
vae.encoder.summary()
## Inspect model architecture
vae.decoder.summary()
# train model
n_epochs = 30
vae.compile(optimizer=keras.optimizers.Adam(learning_rate=.001))
history = vae.fit(X_train, epochs=n_epochs, batch_size=128)
import matplotlib.pyplot as plt
def plot_latent_space(vae: VAE, n: int=10, figsize: float=10):
"""Plot sample images from 2D slices of latent space
@param vae: vae model
@param n: sample nXn images per slice
@param figsize: figure size
"""
for perm in [[0, 1, 2], [1, 2, 0], [2, 1, 0]]:
# display a n*n 2D manifold of digits
digit_size = 28
scale = 1.0
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-scale, scale, n)
grid_y = np.linspace(-scale, scale, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi, 0]])
z_sample[0] = z_sample[0][perm]
x_decoded = vae.decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[
i * digit_size : (i + 1) * digit_size,
j * digit_size : (j + 1) * digit_size,
] = digit
plt.figure(figsize=(figsize, figsize))
start_range = digit_size // 2
end_range = n * digit_size + start_range
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[{}]".format(perm[0]))
plt.ylabel("z[{}]".format(perm[1]))
plt.gca().set_title('z[{}] = 0'.format(perm[2]))
plt.imshow(figure, cmap="Greys_r")
plt.show()
plot_latent_space(vae)
# Principal components
pca = PCA()
latents = vae.encoder.predict(X_train)[2]
pca.fit(latents)
kwargs = {'x_{}'.format(i): (-1., 1.) for i in range(latent_dim)}
@widgets.interact(**kwargs)
def explore_latent_space(**kwargs):
"""Widget to explore latent space from given start position
"""
center_img = pca.transform(np.zeros([1,latent_dim]))
latent_rep_pca = center_img + np.array([[kwargs[key] for key in kwargs]])
latent_rep = pca.inverse_transform(latent_rep_pca)
img = vae.decoder(latent_rep).numpy().reshape(28, 28)
fig, ax = plt.subplots()
ax.axis('off')
ax.axis('off')
ax.imshow(img,cmap='gray', vmin=0, vmax=1)
plt.show()
latents = vae.encoder.predict(X_train)[2]
scatter = px.scatter_3d(x=latents[:, 0], y=latents[:, 1], z=latents[:, 2], color=target_train)
scatter.show()
latents = vae.encoder.predict(X_test)[2]
scatter = px.scatter_3d(x=latents[:, 0], y=latents[:, 1], z=latents[:, 2], color=y_test)
scatter.show()
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test)
n_samples = 10
s = np.random.choice(range(len(X_val)), n_samples)
s = X_val[s]
#s = [X_train_img[i] for i in s]
fig, axes = plt.subplots(nrows=2, ncols=n_samples, figsize=(10, 2))
for img, ax_row in zip(s, axes.T):
x = vae.decoder.predict(vae.encoder.predict(img.reshape(1, 28, 28, 1))[2]).reshape(28, 28)
diff = x - img.reshape(28, 28)
error = (diff * diff).sum()
ax_row[0].axis('off')
ax_row[1].axis('off')
ax_row[0].imshow(img,cmap='gray', vmin=0, vmax=1)
ax_row[1].imshow(x, cmap='gray', vmin=0, vmax=1)
ax_row[1].set_title('E={:.1f}'.format(error))
plt.tight_layout()
plt.show()
from sklearn import metrics
y_test_bin = y_test.copy()
y_test_bin[y_test != 0] = 1
y_val_bin = y_val.copy()
y_val_bin[y_val != 0] = 1
# Evaluate
reconstruction = vae.decoder.predict(vae.encoder(X_val)[2])
rerrors = (reconstruction - X_val).reshape(-1, 28*28)
rerrors = (rerrors * rerrors).sum(axis=1)
# Let's calculate scores if any anomaly is present
if np.any(y_val_bin == 1):
eval = evaluate(y_val_bin.astype(int), rerrors.astype(float))
pr, rec, thr = eval['PR']
f1s = (2 * ((pr * rec)[:-1]/(pr + rec)[:-1]))
threshold = thr[np.argmax(f1s)]
print('Optimal threshold: {}'.format(threshold))
reconstruction = vae.decoder.predict(vae.encoder(X_test)[2])
reconstruction_error = (reconstruction - X_test).reshape(-1, 28*28)
reconstruction_error = (reconstruction_error * reconstruction_error).sum(axis=1)
classification = (reconstruction_error > threshold).astype(int)
print('Precision: {}'.format(metrics.precision_score(y_test_bin, classification)))
print('Recall: {}'.format(metrics.recall_score(y_test_bin, classification)))
print('F1: {}'.format(metrics.f1_score(y_test_bin, classification)))
metrics.confusion_matrix(y_test_bin, classification)
else:
reconstruction_error = None
if reconstruction_error is not None:
combined = list(zip(X_test, reconstruction_error))
combined.sort(key = lambda x: x[1])
if reconstruction_error is not None:
n_rows = 10
n_cols = 10
n_samples = n_rows*n_cols
samples = [c[0] for c in combined[-n_samples:]]
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(2*n_cols, 2*n_rows))
for img, ax in zip(samples, axes.reshape(-1)):
ax.axis('off')
ax.imshow(img.reshape((28,28)), cmap='gray', vmin=0, vmax=1)
plt.show()